Overview

1. Loading the dataset 
2. Summarizing the dataset
3. Visulizing the dataset
4. Evaluating some Alogorithms
5. Making some predictions

In [107]:
# Importing libraries
from pandas import read_csv
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from pandas.tools.plotting import scatter_matrix
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score

from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC

from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix

In [36]:
from sklearn.datasets import load_iris
data = load_iris()

In [38]:
# Load dataset

filename = 'D:\Machine Learning Mastery -Python\code\chapter_19\iris.data.csv'
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = read_csv(filename, names=names)

In [42]:
# Summarise 
# Descriptive Statistics
print(dataset.shape)
print(dataset.head(5))
print(dataset.describe())
print(dataset.groupby('class').size())


(150, 5)
   sepal-length  sepal-width  petal-length  petal-width        class
0           5.1          3.5           1.4          0.2  Iris-setosa
1           4.9          3.0           1.4          0.2  Iris-setosa
2           4.7          3.2           1.3          0.2  Iris-setosa
3           4.6          3.1           1.5          0.2  Iris-setosa
4           5.0          3.6           1.4          0.2  Iris-setosa
       sepal-length  sepal-width  petal-length  petal-width
count    150.000000   150.000000    150.000000   150.000000
mean       5.843333     3.054000      3.758667     1.198667
std        0.828066     0.433594      1.764420     0.763161
min        4.300000     2.000000      1.000000     0.100000
25%        5.100000     2.800000      1.600000     0.300000
50%        5.800000     3.000000      4.350000     1.300000
75%        6.400000     3.300000      5.100000     1.800000
max        7.900000     4.400000      6.900000     2.500000
class
Iris-setosa        50
Iris-versicolor    50
Iris-virginica     50
dtype: int64

In [43]:
# Data Visualization

In [57]:
# Box and Whisker Plots
dataset.plot(kind='box', subplots=True,layout =(2,2),sharex=False )
pyplot.show()



In [69]:
# Histograms
dataset.hist()
pyplot.show()



In [66]:
scatter_matrix(dataset)
pyplot.show()



In [70]:

Evaluate Some Alogorithms

1. Separate out a validation dataset
2. Setup the test harness to use 10-fold cross-validation
3. Build 5 different models to predict species from flower measurements
4. Select the best model

In [91]:
# Prepare Data
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size=0.20
seed =7
X_train,X_Validation, Y_train,Y_Validation = train_test_split(X,Y,test_size=validation_size,random_state = seed)

In [94]:
# Spot -Check Algorithms
models = []
models.append(('LR',LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART',DecisionTreeClassifier()))
models.append(('NB',GaussianNB()))
models.append(('SVM',SVC()))


# Evaluate each model in turn
results = []
names = []

for name, model in models:
    kfold = KFold(n_splits=10,random_state=seed)
    cv_results = cross_val_score(model,X_train, Y_train,cv=kfold,scoring ='accuracy')
    results.append(cv_results)
    names.append(name)
    msg = "%s: %f (%f)" %(name,cv_results.mean(),cv_results.std())
    print(msg)


LR: 0.966667 (0.040825)
LDA: 0.975000 (0.038188)
KNN: 0.983333 (0.033333)
CART: 0.975000 (0.038188)
NB: 0.975000 (0.053359)
SVM: 0.991667 (0.025000)

In [95]:
# Compare Algorithms
fig = pyplot.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()



In [110]:
from sklearn.metrics import classification_report
# Make Predictions
knn = KNeighborsClassifier()
knn.fit(X_train,Y_train)
predictions = knn.predict(X_Validation)
print(accuracy_score(Y_Validation,predictions))
print(confusion_matrix(Y_Validation,predictions))
print(classification_report(Y_Validation,predictions))


0.9
[[ 7  0  0]
 [ 0 11  1]
 [ 0  2  9]]
                 precision    recall  f1-score   support

    Iris-setosa       1.00      1.00      1.00         7
Iris-versicolor       0.85      0.92      0.88        12
 Iris-virginica       0.90      0.82      0.86        11

    avg / total       0.90      0.90      0.90        30


In [ ]: